#define TIMER_SLOP (50*1000) /* ns */ /* copy from timer.c */
+extern u64 cycle_to_ns(u64 cyle);
+
void vtm_set_itm(VCPU *vcpu, uint64_t val)
{
continue_cpu_idle_loop();
}
-void hlt_timer_fn(void *data)
-{
- struct vcpu *v = data;
- if (vcpu_timer_expired(v))
- vcpu_pend_timer(v);
- vcpu_unblock(v);
-}
-
struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
{
struct vcpu *v;
v->arch.breakimm = d->arch.breakimm;
v->arch.last_processor = INVALID_PROCESSOR;
}
- if (!VMX_DOMAIN(v))
- init_timer(&v->arch.hlt_timer, hlt_timer_fn, v, v->processor);
return v;
}
if (v->arch.privregs != NULL)
free_xenheap_pages(v->arch.privregs,
get_order_from_shift(XMAPPEDREGS_SHIFT));
- kill_timer(&v->arch.hlt_timer);
}
free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
}
else {
pal_halt_light_count++;
- set_timer(&v->arch.hlt_timer,
- vcpu_get_next_timer_ns(v));
- do_sched_op_compat(SCHEDOP_block, 0);
+ do_sched_op_compat(SCHEDOP_yield, 0);
}
regs->r8 = 0;
regs->r9 = 0;
unsigned long itc_scale, ns_scale;
unsigned long itc_at_irq;
+/* We don't expect an absolute cycle value here, since then no way
+ * to prevent overflow for large norminator. Normally this conversion
+ * is used for relative offset.
+ */
+u64 cycle_to_ns(u64 cycle)
+{
+ return (cycle * itc_scale) >> 32;
+}
+
+u64 ns_to_cycle(u64 ns)
+{
+ return (ns * ns_scale) >> 32;
+}
+
static inline u64 get_time_delta(void)
{
s64 delta_itc;
unsigned long old_rsc;
int mode_flags;
fpswa_ret_t fpswa_ret; /* save return values of FPSWA emulation */
- struct timer hlt_timer;
struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
#define INVALID_PROCESSOR INT_MAX
-#ifndef _XEN_IA64_TIME_H
-#define _XEN_IA64_TIME_H
-
#include <asm/linux/time.h>
#include <asm/timex.h>
-
-extern unsigned long itc_scale;
-extern unsigned long ns_scale;
-
-/* We don't expect an absolute cycle value here, since then no way
- * to prevent overflow for large norminator. Normally this conversion
- * is used for relative offset.
- */
-static inline u64
-cycle_to_ns(u64 cycle)
-{
- return (cycle * itc_scale) >> 32;
-}
-
-static inline u64
-ns_to_cycle(u64 ns)
-{
- return (ns * ns_scale) >> 32;
-}
-
-#endif /* _XEN_IA64_TIME_H */
// TODO: Many (or perhaps most) of these should eventually be
// static inline functions
-#include <asm/delay.h>
#include <asm/fpu.h>
#include <asm/tlb.h>
#include <asm/ia64_int.h>
-#include <asm/time.h>
#include <public/arch-ia64.h>
typedef unsigned long UINT64;
typedef unsigned int UINT;
return (~((1UL << itir_ps(itir)) - 1));
}
-static inline u64
-vcpu_get_next_timer_ns(VCPU *vcpu)
-{
- return cycle_to_ns(PSCBX(vcpu, domain_itm) - ia64_get_itc()) + NOW();
-}
-
#define verbose(a...) do {if (vcpu_verbose) printf(a);} while(0)
//#define vcpu_quick_region_check(_tr_regions,_ifa) 1